%load_ext autoreload
%autoreload 2
import numpy as np
import pickle
import os
import tensorflow as tf
tf.version.VERSION
'2.7.0'
physical_devices = tf.config.list_physical_devices('GPU')
assert(physical_devices)
print(physical_devices)
if len(physical_devices):
tf.config.experimental.set_memory_growth(physical_devices[0], True)
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
output_dir = "./output" # directory for generated graphics and images
seed = 0
tf.keras.utils.set_random_seed(
seed
)
# Load pickled data
data_dir = "./traffic-signs-data"
training_file = os.sep.join([data_dir, "train.p"])
validation_file= os.sep.join([data_dir, "valid.p"])
testing_file = os.sep.join([data_dir, "test.p"])
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
The pickled data is a dictionary with 4 key/value pairs:
'features' is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).'labels' is a 1D array containing the label/class id of the traffic sign. The file signnames.csv contains id -> name mappings for each id.'sizes' is a list containing tuples, (width, height) representing the original width and height the image.'coords' is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES# Number of training examples
n_train = X_train.shape[0]
# Number of validation examples
n_validation = X_valid.shape[0]
# Number of testing examples.
n_test = X_test.shape[0]
# What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# How many unique classes/labels there are in the dataset.
n_classes = len(set(y_train))
print("Number of training examples =", n_train)
print("Number of validation examples =", n_validation)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
Number of training examples = 34799 Number of validation examples = 4410 Number of testing examples = 12630 Image data shape = (32, 32, 3) Number of classes = 43
import matplotlib.pyplot as plt
%matplotlib inline
Matplotlib created a temporary config/cache directory at /tmp/matplotlib-qxp0mbt9 because the default path (/.config/matplotlib) is not a writable directory; it is highly recommended to set the MPLCONFIGDIR environment variable to a writable directory, in particular to speed up the import of Matplotlib and to better support multiprocessing.
from traffic_sign_classifier.utils import group_by_category, load_signnames
# Loading the classes/categories names
class2label = load_signnames(os.sep.join([data_dir, "signnames.csv"]))
# by_category_sample = group_by_category(y_train)
os.path.join(output_dir, "images_preview.jpg")
'./output/images_preview.jpg'
from traffic_sign_classifier.visualization import grid_visu
grid_visu(X_train, labels=y_train, sample_size=5, categories_per_fig=9, # , categories=range(10),
label_to_name=class2label,
shuffle=True, output_dir_path="./output/previews")
# Grouping dataset by categories using the labels
category_grouped_datasets = {} # A dictionary per datatset grouping the sample with their attributes
for dataset, data_ in [ ("train", y_train), ("test", y_test), ("validation", y_valid)]:
print(f"{dataset} data set size -> {len(data_)}")
category_grouped_datasets[dataset] = group_by_category(data_)
train data set size -> 34799 test data set size -> 12630 validation data set size -> 4410
# Categories/classese distribution (in train/validation/test)
from collections import defaultdict
from typing import Optional
def visualize_datasets_distribution(category_grouped_datasset_dict, output_dir:str =output_dir, output_file_name:Optional[str]=None):
""" A helper to visualize datasets distribution """
plt.figure(figsize=(10, 16))
bar_width = 0.3
width_offset = 0
for dataset, by_category in category_grouped_datasset_dict.items():
# Plotting
categories, sizes = zip(*[(category, len(items)) for category, items in sorted(by_category.items(), key=lambda x: len(x[1]), reverse=False)])
plt.barh([width_offset + item for item in range(len(categories))], sizes, height=bar_width,
tick_label=[class2label.get(item, item) for item in categories],
label=f"{dataset} dataset")
width_offset += bar_width
plt.legend()
plt.title(f"Train/validation/test classes distribution")
plt.grid()
# Saving the figure to a file (file should ends png)
if isinstance(output_file_name, str) and output_file_name.endswith(".png"):
plt.savefig(os.path.join(output_dir,output_file_name))
visualize_datasets_distribution(category_grouped_datasets, output_file_name="classes_distribution.png")
Minimally, the image data should be normalized so that the data has mean zero and equal variance. An apporximation is done by centralizing and scaling impemented as a preprocessing layers of the neural network (see preprocessing_layers intialization in Model architecture section).
Here we will augment the data generating sample from random transformation applyied on the classes with few training examples/samples.
Optionally during the training data augmentation layers are used to alter the data making the model a bit more robust to some transformation change/range. (see training model section)
from functools import reduce
from sklearn.utils import resample
# Selecting categories with few example to augment
# Let's select all the category below `category_size_threshold` example in the training and
# generate the complementary to `category_size_threshold` to add
category_size_threshold = 600
to_augment = {category: resample(items, n_samples=max(200, category_size_threshold-len(items)), replace=True, stratify=items) for category, items in
sorted(category_grouped_datasets["train"].items(), key=lambda x: len(x[1])) # if len(items)< category_size_threshold
}
to_augment_items = reduce(lambda x, y: x+y, to_augment.values())
# Visualize items to augment (when only a subset of classes is chosen )
# grid_visu(X_train[to_augment_items], labels=y_train[to_augment_items], sample_size=5, categories_per_fig=5,
# label_to_name=class2label,
# shuffle=True, output_dir_path="./output/to_augment")
# Generate augmented (transformed data) dataset from the resampled data ( categories with few examples < category_size_threshold)
from tensorflow.keras.layers import RandomRotation, Rescaling, Resizing, GaussianNoise
from tensorflow.keras.layers.experimental.preprocessing import RandomCrop, CenterCrop
from tensorflow.keras.layers import RandomTranslation, RandomContrast, RandomZoom, RandomContrast
from tensorflow.keras import Sequential
# Define augmentation layers
augmentation_layers = [
RandomRotation(.02),
RandomTranslation(.1, .1),
RandomZoom(height_factor=(-0.1, -0.1)),
RandomContrast(.1)
]
augmentation = Sequential(augmentation_layers)
generated = (tf.data.Dataset.from_tensor_slices((X_train[to_augment_items], y_train[to_augment_items])).
batch(256).map(lambda x, y: (augmentation(x, training=True), y), num_parallel_calls=tf.data.AUTOTUNE).prefetch(tf.data.AUTOTUNE)
)
from collections import Counter
categories_counter = Counter()
# Collecting the generated data
gen_X = []
gen_y = []
for batch_X, batch_y in generated:
gen_X.append(np.array(batch_X))
gen_y.append(np.array(batch_y))
categories_counter.update(np.array(batch_y))
gen_X = np.concatenate(gen_X)
gen_y = np.concatenate(gen_y)
# Append it to the original data as (augmented)
print(f" augmentation -> {categories_counter}")
augmented_X = np.concatenate((X_train, gen_X))
augmented_y = np.concatenate((y_train, gen_y))
augmented_sample_per_category = Counter(augmented_y)
print(f" samples per class after augmentation -> {augmented_sample_per_category}")
# A Sanity check (after merge all the category should at least have `category_size_threshold` samples)
assert(all([cnt >= category_size_threshold for _, cnt in augmented_sample_per_category.items()]))
augmentation -> Counter({37: 420, 19: 420, 0: 420, 41: 390, 42: 390, 32: 390, 27: 390, 29: 360, 24: 360, 39: 330, 21: 330, 40: 300, 20: 300, 36: 270, 22: 270, 16: 240, 34: 240, 6: 240, 30: 210, 23: 200, 28: 200, 26: 200, 15: 200, 33: 200, 31: 200, 14: 200, 17: 200, 35: 200, 18: 200, 11: 200, 3: 200, 8: 200, 7: 200, 9: 200, 25: 200, 5: 200, 4: 200, 10: 200, 38: 200, 12: 200, 13: 200, 1: 200, 2: 200})
samples per class after augmentation -> Counter({2: 2210, 1: 2180, 13: 2120, 12: 2090, 38: 2060, 10: 2000, 4: 1970, 5: 1850, 25: 1550, 9: 1520, 7: 1490, 3: 1460, 8: 1460, 11: 1370, 35: 1280, 18: 1280, 17: 1190, 31: 890, 14: 890, 33: 799, 26: 740, 15: 740, 28: 680, 23: 650, 41: 600, 36: 600, 40: 600, 22: 600, 37: 600, 16: 600, 19: 600, 42: 600, 0: 600, 32: 600, 27: 600, 29: 600, 24: 600, 34: 600, 6: 600, 30: 600, 39: 600, 21: 600, 20: 600})
category_grouped_datasets["train+augmented"] = group_by_category(augmented_y)
visualize_datasets_distribution(category_grouped_datasets, output_file_name="classes_distribution_with_augmented.png")
grid_visu(np.array(gen_X), labels=list(gen_y), sample_size=5, categories_per_fig=5,
label_to_name=class2label,
shuffle=True, output_dir_path="./output/augmentation_generation")
Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the German Traffic Sign Dataset.
Model requirements:
To meet specifications, the validation set accuracy will need to be at least 0.93.
There are various aspects to consider when thinking about this problem:
An example of a published baseline model on this problem.
# from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import RandomRotation, Rescaling, Resizing, GaussianNoise
Le Net is chosen to be the baseline architecture. Augmentation and slightly variated architecture are evaluated for performance gain.
from traffic_sign_classifier.nn.lenet import LeNet, ConvLayerConfig
from datetime import datetime
preprocessing_layers = [
Resizing(32, 32),
Rescaling(scale=1./127.5, offset=-1)
]
# Setting up networks (and models) to compare
# TODO: add a Gray-Level model
# TODO: use model name to name layers
logits = False # append a Softmax activation ! set to true to keep the logits output
networks = {
"tsc_baseline": LeNet(categ_nbr=n_classes, logits=logits,
preprocessing_layers = preprocessing_layers,
name="tsc_baseline",
batch_norm=False,
dropout=0)
,
"tsc_lenet_batch-norm_dropout": LeNet(categ_nbr=n_classes, logits=logits,
preprocessing_layers = preprocessing_layers,
name="tsc_lenet_batch-norm_dropout"),
"tsc_lenet_always_augment": LeNet(categ_nbr=n_classes, logits=logits,
preprocessing_layers = preprocessing_layers,
augmentation_layers = [
RandomRotation(.01),
RandomTranslation(.05, .05),
RandomZoom(height_factor=(-0.1, 0.1)),
RandomContrast(.1)
],
name="tsc_lenet_always_augment"),
"tsc_lenet_more_filters":
LeNet(categ_nbr=n_classes, logits=logits,
preprocessing_layers = preprocessing_layers,
conv_layers_config = {
1: ConvLayerConfig(filters=12, kernel_size=(5, 5)),
2: ConvLayerConfig(filters=32, kernel_size=(5, 5)),
},
name="tsc_lenet_more_filters",
),
"tsc_lenet_7x7_more_filters":
LeNet(categ_nbr=n_classes, logits=logits,
preprocessing_layers = preprocessing_layers,
conv_layers_config = {
1: ConvLayerConfig(filters=12, kernel_size=(7, 7)),
2: ConvLayerConfig(filters=32, kernel_size=(7, 7)),
},
name="tsc_lenet_7x7_more_filters",
),
"tsc_lenet_11x11_more_filters":
LeNet(categ_nbr=n_classes, logits=logits,
preprocessing_layers = preprocessing_layers,
conv_layers_config = {
1: ConvLayerConfig(filters=12, kernel_size=(11, 11)),
2: ConvLayerConfig(filters=32, kernel_size=(7, 7)),
},
name="tsc_lenet_11x11_more_filters",
)
}
# Include network name in layer naming
# TODO summary to md
def md_summary(model):
for layer in model.layers:
print(f"{layer.name}{layer.__class__}")
md_summary(networks.get("tsc_lenet_more_filters"))
conv2d_6<class 'keras.layers.convolutional.Conv2D'> batch_normalization_4<class 'keras.layers.normalization.batch_normalization.BatchNormalization'> max_pooling2d_6<class 'keras.layers.pooling.MaxPooling2D'> conv2d_7<class 'keras.layers.convolutional.Conv2D'> batch_normalization_5<class 'keras.layers.normalization.batch_normalization.BatchNormalization'> max_pooling2d_7<class 'keras.layers.pooling.MaxPooling2D'> flatten_3<class 'keras.layers.core.flatten.Flatten'> dropout_2<class 'keras.layers.core.dropout.Dropout'> dense_9<class 'keras.layers.core.dense.Dense'> dense_10<class 'keras.layers.core.dense.Dense'> dense_11<class 'keras.layers.core.dense.Dense'>
# init models
for name , network in networks.items():
# setup model with arbitrary image size as input (arch. already setup to resize)
print(f"building (models) for {name}")
network.build_models(input_=tf.keras.Input(shape=(None, None, 3)))
network.model.summary()
# network.pred_model.summary()
building (models) for tsc_baseline
Model: "tsc_baseline.training"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, None, None, 3)] 0
resizing (Resizing) (None, 32, 32, 3) 0
rescaling (Rescaling) (None, 32, 32, 3) 0
conv2d (Conv2D) (None, 28, 28, 6) 456
max_pooling2d (MaxPooling2D (None, 14, 14, 6) 0
)
conv2d_1 (Conv2D) (None, 10, 10, 16) 2416
max_pooling2d_1 (MaxPooling (None, 5, 5, 16) 0
2D)
flatten (Flatten) (None, 400) 0
dense (Dense) (None, 120) 48120
dense_1 (Dense) (None, 84) 10164
dense_2 (Dense) (None, 43) 3655
=================================================================
Total params: 64,811
Trainable params: 64,811
Non-trainable params: 0
_________________________________________________________________
building (models) for tsc_lenet_batch-norm_dropout
Model: "tsc_lenet_batch-norm_dropout.training"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, None, None, 3)] 0
resizing (Resizing) (None, 32, 32, 3) 0
rescaling (Rescaling) (None, 32, 32, 3) 0
conv2d_2 (Conv2D) (None, 28, 28, 6) 456
batch_normalization (BatchN (None, 28, 28, 6) 24
ormalization)
max_pooling2d_2 (MaxPooling (None, 14, 14, 6) 0
2D)
conv2d_3 (Conv2D) (None, 10, 10, 16) 2416
batch_normalization_1 (Batc (None, 10, 10, 16) 64
hNormalization)
max_pooling2d_3 (MaxPooling (None, 5, 5, 16) 0
2D)
flatten_1 (Flatten) (None, 400) 0
dropout (Dropout) (None, 400) 0
dense_3 (Dense) (None, 120) 48120
dense_4 (Dense) (None, 84) 10164
dense_5 (Dense) (None, 43) 3655
=================================================================
Total params: 64,899
Trainable params: 64,855
Non-trainable params: 44
_________________________________________________________________
building (models) for tsc_lenet_always_augment
Model: "tsc_lenet_always_augment.training"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) [(None, None, None, 3)] 0
random_rotation_1 (RandomRo (None, None, None, 3) 0
tation)
random_translation_1 (Rando (None, None, None, 3) 0
mTranslation)
random_zoom_1 (RandomZoom) (None, None, None, 3) 0
random_contrast_1 (RandomCo (None, None, None, 3) 0
ntrast)
resizing (Resizing) (None, 32, 32, 3) 0
rescaling (Rescaling) (None, 32, 32, 3) 0
conv2d_4 (Conv2D) (None, 28, 28, 6) 456
batch_normalization_2 (Batc (None, 28, 28, 6) 24
hNormalization)
max_pooling2d_4 (MaxPooling (None, 14, 14, 6) 0
2D)
conv2d_5 (Conv2D) (None, 10, 10, 16) 2416
batch_normalization_3 (Batc (None, 10, 10, 16) 64
hNormalization)
max_pooling2d_5 (MaxPooling (None, 5, 5, 16) 0
2D)
flatten_2 (Flatten) (None, 400) 0
dropout_1 (Dropout) (None, 400) 0
dense_6 (Dense) (None, 120) 48120
dense_7 (Dense) (None, 84) 10164
dense_8 (Dense) (None, 43) 3655
=================================================================
Total params: 64,899
Trainable params: 64,855
Non-trainable params: 44
_________________________________________________________________
building (models) for tsc_lenet_more_filters
Model: "tsc_lenet_more_filters.training"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_4 (InputLayer) [(None, None, None, 3)] 0
resizing (Resizing) (None, 32, 32, 3) 0
rescaling (Rescaling) (None, 32, 32, 3) 0
conv2d_6 (Conv2D) (None, 28, 28, 12) 912
batch_normalization_4 (Batc (None, 28, 28, 12) 48
hNormalization)
max_pooling2d_6 (MaxPooling (None, 14, 14, 12) 0
2D)
conv2d_7 (Conv2D) (None, 10, 10, 32) 9632
batch_normalization_5 (Batc (None, 10, 10, 32) 128
hNormalization)
max_pooling2d_7 (MaxPooling (None, 5, 5, 32) 0
2D)
flatten_3 (Flatten) (None, 800) 0
dropout_2 (Dropout) (None, 800) 0
dense_9 (Dense) (None, 120) 96120
dense_10 (Dense) (None, 84) 10164
dense_11 (Dense) (None, 43) 3655
=================================================================
Total params: 120,659
Trainable params: 120,571
Non-trainable params: 88
_________________________________________________________________
building (models) for tsc_lenet_7x7_more_filters
Model: "tsc_lenet_7x7_more_filters.training"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_5 (InputLayer) [(None, None, None, 3)] 0
resizing (Resizing) (None, 32, 32, 3) 0
rescaling (Rescaling) (None, 32, 32, 3) 0
conv2d_8 (Conv2D) (None, 26, 26, 12) 1776
batch_normalization_6 (Batc (None, 26, 26, 12) 48
hNormalization)
max_pooling2d_8 (MaxPooling (None, 13, 13, 12) 0
2D)
conv2d_9 (Conv2D) (None, 7, 7, 32) 18848
batch_normalization_7 (Batc (None, 7, 7, 32) 128
hNormalization)
max_pooling2d_9 (MaxPooling (None, 3, 3, 32) 0
2D)
flatten_4 (Flatten) (None, 288) 0
dropout_3 (Dropout) (None, 288) 0
dense_12 (Dense) (None, 120) 34680
dense_13 (Dense) (None, 84) 10164
dense_14 (Dense) (None, 43) 3655
=================================================================
Total params: 69,299
Trainable params: 69,211
Non-trainable params: 88
_________________________________________________________________
building (models) for tsc_lenet_11x11_more_filters
Model: "tsc_lenet_11x11_more_filters.training"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_6 (InputLayer) [(None, None, None, 3)] 0
resizing (Resizing) (None, 32, 32, 3) 0
rescaling (Rescaling) (None, 32, 32, 3) 0
conv2d_10 (Conv2D) (None, 22, 22, 12) 4368
batch_normalization_8 (Batc (None, 22, 22, 12) 48
hNormalization)
max_pooling2d_10 (MaxPoolin (None, 11, 11, 12) 0
g2D)
conv2d_11 (Conv2D) (None, 5, 5, 32) 18848
batch_normalization_9 (Batc (None, 5, 5, 32) 128
hNormalization)
max_pooling2d_11 (MaxPoolin (None, 2, 2, 32) 0
g2D)
flatten_5 (Flatten) (None, 128) 0
dropout_4 (Dropout) (None, 128) 0
dense_15 (Dense) (None, 120) 15480
dense_16 (Dense) (None, 84) 10164
dense_17 (Dense) (None, 43) 3655
=================================================================
Total params: 52,691
Trainable params: 52,603
Non-trainable params: 88
_________________________________________________________________
optimizer = Adam(learning_rate=0.002, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
for _, network in networks.items():
network.compile_model(optimizer=optimizer, metrics=["accuracy"])
A validation set can be used to assess how well the model is performing. A low accuracy on the training and validationsets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
assert(set(augmented_y)==set(y_valid)) # a sanity check that all the labels appear in both train and validation
y_train_categorical = to_categorical(augmented_y)
y_valid_categorical = to_categorical(y_valid)
# Sanity check
assert y_train_categorical.shape[1] == y_valid_categorical.shape[1]
y_train_categorical.shape
(45869, 43)
from sklearn.utils import shuffle
shuffled_X_train, shuffled_y_train = shuffle(augmented_X, y_train_categorical, random_state=0)
# Tensor board setup
run_id = datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join("logs", run_id)
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
# Set early stopping callback
early_stopping_callback = tf.keras.callbacks.EarlyStopping(
monitor="val_accuracy",
min_delta=0,
patience=10,
verbose=1,
mode="auto",
baseline=None,
restore_best_weights=True,
)
from collections import defaultdict
history = defaultdict(lambda: []) # to support interactive training: multi-round of multiple epochs.
EPOCHS = 50
BATCH_SIZE = 256 # to variate the batch_size
# Train all the test networks (sequentially)
for name, network in networks.items():
print(f"training -> {name}")
history[name].append(network.model.fit(shuffled_X_train, shuffled_y_train,
validation_data=(X_valid, y_valid_categorical), shuffle=False, # TODO: shuffle externally and set shuffle to False
epochs=EPOCHS, batch_size=BATCH_SIZE, callbacks=[tensorboard_callback, early_stopping_callback]))
training -> tsc_baseline Epoch 1/50 180/180 [==============================] - 2s 4ms/step - loss: 1.7033 - accuracy: 0.5289 - val_loss: 0.7929 - val_accuracy: 0.7660 Epoch 2/50 180/180 [==============================] - 1s 3ms/step - loss: 0.5396 - accuracy: 0.8497 - val_loss: 0.4859 - val_accuracy: 0.8728 Epoch 3/50 180/180 [==============================] - 1s 3ms/step - loss: 0.3029 - accuracy: 0.9167 - val_loss: 0.4324 - val_accuracy: 0.8961 Epoch 4/50 180/180 [==============================] - 1s 3ms/step - loss: 0.2031 - accuracy: 0.9435 - val_loss: 0.4482 - val_accuracy: 0.9063 Epoch 5/50 180/180 [==============================] - 1s 3ms/step - loss: 0.1476 - accuracy: 0.9580 - val_loss: 0.4235 - val_accuracy: 0.9134 Epoch 6/50 180/180 [==============================] - 1s 4ms/step - loss: 0.1135 - accuracy: 0.9670 - val_loss: 0.4648 - val_accuracy: 0.8989 Epoch 7/50 180/180 [==============================] - 1s 3ms/step - loss: 0.0929 - accuracy: 0.9726 - val_loss: 0.5277 - val_accuracy: 0.9032 Epoch 8/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0767 - accuracy: 0.9774 - val_loss: 0.4380 - val_accuracy: 0.9195 Epoch 9/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0676 - accuracy: 0.9789 - val_loss: 0.5197 - val_accuracy: 0.9150 Epoch 10/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0575 - accuracy: 0.9826 - val_loss: 0.4952 - val_accuracy: 0.9082 Epoch 11/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0523 - accuracy: 0.9841 - val_loss: 0.5050 - val_accuracy: 0.9141 Epoch 12/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0477 - accuracy: 0.9856 - val_loss: 0.6813 - val_accuracy: 0.8948 Epoch 13/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0408 - accuracy: 0.9874 - val_loss: 0.6987 - val_accuracy: 0.9050 Epoch 14/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0406 - accuracy: 0.9879 - val_loss: 0.7976 - val_accuracy: 0.9127 Epoch 15/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0508 - accuracy: 0.9842 - val_loss: 0.6582 - val_accuracy: 0.9091 Epoch 16/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0335 - accuracy: 0.9892 - val_loss: 0.6023 - val_accuracy: 0.9161 Epoch 17/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0342 - accuracy: 0.9891 - val_loss: 0.5780 - val_accuracy: 0.9315 Epoch 18/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0270 - accuracy: 0.9915 - val_loss: 0.7229 - val_accuracy: 0.9193 Epoch 19/50 180/180 [==============================] - 1s 3ms/step - loss: 0.0341 - accuracy: 0.9893 - val_loss: 0.7549 - val_accuracy: 0.9063 Epoch 20/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0250 - accuracy: 0.9918 - val_loss: 0.6396 - val_accuracy: 0.9238 Epoch 21/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0338 - accuracy: 0.9889 - val_loss: 0.8246 - val_accuracy: 0.9136 Epoch 22/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0271 - accuracy: 0.9916 - val_loss: 0.6635 - val_accuracy: 0.9134 Epoch 23/50 180/180 [==============================] - 1s 3ms/step - loss: 0.0301 - accuracy: 0.9902 - val_loss: 0.7937 - val_accuracy: 0.9125 Epoch 24/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0230 - accuracy: 0.9925 - val_loss: 0.9032 - val_accuracy: 0.9202 Epoch 25/50 180/180 [==============================] - 1s 3ms/step - loss: 0.0315 - accuracy: 0.9906 - val_loss: 0.8038 - val_accuracy: 0.9218 Epoch 26/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0335 - accuracy: 0.9899 - val_loss: 1.1420 - val_accuracy: 0.9082 Epoch 27/50 179/180 [============================>.] - ETA: 0s - loss: 0.0237 - accuracy: 0.9921Restoring model weights from the end of the best epoch: 17. 180/180 [==============================] - 1s 5ms/step - loss: 0.0237 - accuracy: 0.9921 - val_loss: 1.0438 - val_accuracy: 0.9243 Epoch 00027: early stopping training -> tsc_lenet_batch-norm_dropout Epoch 1/50 180/180 [==============================] - 1s 5ms/step - loss: 1.0370 - accuracy: 0.6937 - val_loss: 0.7757 - val_accuracy: 0.7728 Epoch 2/50 180/180 [==============================] - 1s 4ms/step - loss: 0.3040 - accuracy: 0.9052 - val_loss: 0.4972 - val_accuracy: 0.8594 Epoch 3/50 180/180 [==============================] - 1s 5ms/step - loss: 0.1955 - accuracy: 0.9371 - val_loss: 0.3867 - val_accuracy: 0.9020 Epoch 4/50 180/180 [==============================] - 1s 4ms/step - loss: 0.1439 - accuracy: 0.9531 - val_loss: 0.4325 - val_accuracy: 0.8984 Epoch 5/50 180/180 [==============================] - 1s 4ms/step - loss: 0.1195 - accuracy: 0.9612 - val_loss: 0.7675 - val_accuracy: 0.8503 Epoch 6/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0991 - accuracy: 0.9677 - val_loss: 0.4404 - val_accuracy: 0.9102 Epoch 7/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0785 - accuracy: 0.9745 - val_loss: 0.4717 - val_accuracy: 0.8932 Epoch 8/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0812 - accuracy: 0.9734 - val_loss: 0.4168 - val_accuracy: 0.9159 Epoch 9/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0707 - accuracy: 0.9771 - val_loss: 0.5906 - val_accuracy: 0.8900 Epoch 10/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0658 - accuracy: 0.9783 - val_loss: 0.3673 - val_accuracy: 0.9213 Epoch 11/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0572 - accuracy: 0.9808 - val_loss: 0.3884 - val_accuracy: 0.9197 Epoch 12/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0554 - accuracy: 0.9810 - val_loss: 0.4484 - val_accuracy: 0.9143 Epoch 13/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0546 - accuracy: 0.9818 - val_loss: 0.5174 - val_accuracy: 0.9127 Epoch 14/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0504 - accuracy: 0.9836 - val_loss: 0.5663 - val_accuracy: 0.9007 Epoch 15/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0494 - accuracy: 0.9836 - val_loss: 0.6257 - val_accuracy: 0.8909 Epoch 16/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0578 - accuracy: 0.9817 - val_loss: 0.4560 - val_accuracy: 0.9218 Epoch 17/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0440 - accuracy: 0.9856 - val_loss: 0.4929 - val_accuracy: 0.9170 Epoch 18/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0418 - accuracy: 0.9860 - val_loss: 0.4526 - val_accuracy: 0.9209 Epoch 19/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0365 - accuracy: 0.9878 - val_loss: 0.5222 - val_accuracy: 0.9104 Epoch 20/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0386 - accuracy: 0.9871 - val_loss: 0.4624 - val_accuracy: 0.9179 Epoch 21/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0396 - accuracy: 0.9871 - val_loss: 0.4926 - val_accuracy: 0.9202 Epoch 22/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0364 - accuracy: 0.9875 - val_loss: 0.6446 - val_accuracy: 0.9075 Epoch 23/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0391 - accuracy: 0.9872 - val_loss: 0.6373 - val_accuracy: 0.9095 Epoch 24/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0368 - accuracy: 0.9882 - val_loss: 0.6861 - val_accuracy: 0.8934 Epoch 25/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0342 - accuracy: 0.9888 - val_loss: 0.5175 - val_accuracy: 0.9195 Epoch 26/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0364 - accuracy: 0.9879 - val_loss: 0.4603 - val_accuracy: 0.9265 Epoch 27/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0365 - accuracy: 0.9881 - val_loss: 0.4279 - val_accuracy: 0.9324 Epoch 28/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0307 - accuracy: 0.9898 - val_loss: 0.4289 - val_accuracy: 0.9283 Epoch 29/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0320 - accuracy: 0.9895 - val_loss: 0.4435 - val_accuracy: 0.9286 Epoch 30/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0306 - accuracy: 0.9901 - val_loss: 0.4977 - val_accuracy: 0.9252 Epoch 31/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0327 - accuracy: 0.9888 - val_loss: 0.6590 - val_accuracy: 0.9138 Epoch 32/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0269 - accuracy: 0.9913 - val_loss: 0.4552 - val_accuracy: 0.9345 Epoch 33/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0316 - accuracy: 0.9896 - val_loss: 0.4639 - val_accuracy: 0.9297 Epoch 34/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0347 - accuracy: 0.9893 - val_loss: 0.4672 - val_accuracy: 0.9293 Epoch 35/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0307 - accuracy: 0.9903 - val_loss: 0.4410 - val_accuracy: 0.9329 Epoch 36/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0258 - accuracy: 0.9916 - val_loss: 0.5487 - val_accuracy: 0.9179 Epoch 37/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0259 - accuracy: 0.9916 - val_loss: 0.4764 - val_accuracy: 0.9254 Epoch 38/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0260 - accuracy: 0.9919 - val_loss: 0.5861 - val_accuracy: 0.9186 Epoch 39/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0267 - accuracy: 0.9913 - val_loss: 0.6663 - val_accuracy: 0.9077 Epoch 40/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0228 - accuracy: 0.9923 - val_loss: 0.5609 - val_accuracy: 0.9093 Epoch 41/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0294 - accuracy: 0.9907 - val_loss: 0.5047 - val_accuracy: 0.9315 Epoch 42/50 174/180 [============================>.] - ETA: 0s - loss: 0.0289 - accuracy: 0.9903Restoring model weights from the end of the best epoch: 32. 180/180 [==============================] - 1s 6ms/step - loss: 0.0289 - accuracy: 0.9903 - val_loss: 0.5576 - val_accuracy: 0.9259 Epoch 00042: early stopping training -> tsc_lenet_always_augment Epoch 1/50 180/180 [==============================] - 2s 9ms/step - loss: 1.3660 - accuracy: 0.5858 - val_loss: 1.2543 - val_accuracy: 0.6338 Epoch 2/50 180/180 [==============================] - 2s 9ms/step - loss: 0.4921 - accuracy: 0.8435 - val_loss: 0.6048 - val_accuracy: 0.8132 Epoch 3/50 180/180 [==============================] - 1s 8ms/step - loss: 0.3364 - accuracy: 0.8931 - val_loss: 0.4406 - val_accuracy: 0.8730 Epoch 4/50 180/180 [==============================] - 1s 8ms/step - loss: 0.2576 - accuracy: 0.9190 - val_loss: 0.4382 - val_accuracy: 0.8803 Epoch 5/50 180/180 [==============================] - 2s 9ms/step - loss: 0.2173 - accuracy: 0.9296 - val_loss: 0.4132 - val_accuracy: 0.8964 Epoch 6/50 180/180 [==============================] - 1s 8ms/step - loss: 0.1900 - accuracy: 0.9401 - val_loss: 0.4220 - val_accuracy: 0.8982 Epoch 7/50 180/180 [==============================] - 1s 7ms/step - loss: 0.1600 - accuracy: 0.9488 - val_loss: 0.5418 - val_accuracy: 0.8825 Epoch 8/50 180/180 [==============================] - 1s 8ms/step - loss: 0.1526 - accuracy: 0.9518 - val_loss: 0.4190 - val_accuracy: 0.8966 Epoch 9/50 180/180 [==============================] - 1s 7ms/step - loss: 0.1352 - accuracy: 0.9554 - val_loss: 0.5280 - val_accuracy: 0.8864 Epoch 10/50 180/180 [==============================] - 1s 8ms/step - loss: 0.1317 - accuracy: 0.9579 - val_loss: 0.4320 - val_accuracy: 0.8961 Epoch 11/50 180/180 [==============================] - 1s 8ms/step - loss: 0.1226 - accuracy: 0.9606 - val_loss: 0.4788 - val_accuracy: 0.9016 Epoch 12/50 180/180 [==============================] - 1s 8ms/step - loss: 0.1130 - accuracy: 0.9634 - val_loss: 0.4616 - val_accuracy: 0.9027 Epoch 13/50 180/180 [==============================] - 1s 7ms/step - loss: 0.1149 - accuracy: 0.9632 - val_loss: 0.4046 - val_accuracy: 0.9048 Epoch 14/50 180/180 [==============================] - 1s 7ms/step - loss: 0.1047 - accuracy: 0.9660 - val_loss: 0.3931 - val_accuracy: 0.9039 Epoch 15/50 180/180 [==============================] - 1s 8ms/step - loss: 0.0992 - accuracy: 0.9692 - val_loss: 0.3959 - val_accuracy: 0.9079 Epoch 16/50 180/180 [==============================] - 1s 8ms/step - loss: 0.0975 - accuracy: 0.9692 - val_loss: 0.3418 - val_accuracy: 0.9197 Epoch 17/50 180/180 [==============================] - 2s 8ms/step - loss: 0.0965 - accuracy: 0.9685 - val_loss: 0.4599 - val_accuracy: 0.9084 Epoch 18/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0924 - accuracy: 0.9702 - val_loss: 0.3570 - val_accuracy: 0.9120 Epoch 19/50 180/180 [==============================] - 1s 8ms/step - loss: 0.0910 - accuracy: 0.9706 - val_loss: 0.3506 - val_accuracy: 0.9179 Epoch 20/50 180/180 [==============================] - 1s 8ms/step - loss: 0.0820 - accuracy: 0.9740 - val_loss: 0.4246 - val_accuracy: 0.9113 Epoch 21/50 180/180 [==============================] - 1s 7ms/step - loss: 0.0847 - accuracy: 0.9730 - val_loss: 0.4215 - val_accuracy: 0.9170 Epoch 22/50 180/180 [==============================] - 1s 8ms/step - loss: 0.0875 - accuracy: 0.9718 - val_loss: 0.3377 - val_accuracy: 0.9222 Epoch 23/50 180/180 [==============================] - 1s 7ms/step - loss: 0.0803 - accuracy: 0.9740 - val_loss: 0.4698 - val_accuracy: 0.9091 Epoch 24/50 180/180 [==============================] - 1s 7ms/step - loss: 0.0772 - accuracy: 0.9751 - val_loss: 0.4093 - val_accuracy: 0.9220 Epoch 25/50 180/180 [==============================] - 1s 7ms/step - loss: 0.0746 - accuracy: 0.9760 - val_loss: 0.3976 - val_accuracy: 0.9261 Epoch 26/50 180/180 [==============================] - 1s 7ms/step - loss: 0.0776 - accuracy: 0.9763 - val_loss: 0.3429 - val_accuracy: 0.9361 Epoch 27/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0700 - accuracy: 0.9774 - val_loss: 0.3647 - val_accuracy: 0.9206 Epoch 28/50 180/180 [==============================] - 1s 8ms/step - loss: 0.0713 - accuracy: 0.9765 - val_loss: 0.3077 - val_accuracy: 0.9351 Epoch 29/50 180/180 [==============================] - 1s 8ms/step - loss: 0.0709 - accuracy: 0.9774 - val_loss: 0.3571 - val_accuracy: 0.9261 Epoch 30/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0694 - accuracy: 0.9784 - val_loss: 0.3414 - val_accuracy: 0.9331 Epoch 31/50 180/180 [==============================] - 2s 9ms/step - loss: 0.0623 - accuracy: 0.9798 - val_loss: 0.4818 - val_accuracy: 0.9249 Epoch 32/50 180/180 [==============================] - 1s 7ms/step - loss: 0.0698 - accuracy: 0.9775 - val_loss: 0.5756 - val_accuracy: 0.9118 Epoch 33/50 180/180 [==============================] - 1s 8ms/step - loss: 0.0653 - accuracy: 0.9790 - val_loss: 0.4262 - val_accuracy: 0.9268 Epoch 34/50 180/180 [==============================] - 1s 7ms/step - loss: 0.0670 - accuracy: 0.9789 - val_loss: 0.5012 - val_accuracy: 0.9195 Epoch 35/50 180/180 [==============================] - 1s 8ms/step - loss: 0.0615 - accuracy: 0.9802 - val_loss: 0.3817 - val_accuracy: 0.9179 Epoch 36/50 172/180 [===========================>..] - ETA: 0s - loss: 0.0610 - accuracy: 0.9809Restoring model weights from the end of the best epoch: 26. 180/180 [==============================] - 1s 7ms/step - loss: 0.0608 - accuracy: 0.9810 - val_loss: 0.3998 - val_accuracy: 0.9299 Epoch 00036: early stopping training -> tsc_lenet_more_filters Epoch 1/50 180/180 [==============================] - 1s 5ms/step - loss: 1.2613 - accuracy: 0.6337 - val_loss: 0.9318 - val_accuracy: 0.7025 Epoch 2/50 180/180 [==============================] - 1s 5ms/step - loss: 0.2869 - accuracy: 0.9095 - val_loss: 0.4875 - val_accuracy: 0.8721 Epoch 3/50 180/180 [==============================] - 1s 4ms/step - loss: 0.1550 - accuracy: 0.9503 - val_loss: 0.5154 - val_accuracy: 0.8676 Epoch 4/50 180/180 [==============================] - 1s 4ms/step - loss: 0.1095 - accuracy: 0.9647 - val_loss: 0.4207 - val_accuracy: 0.8980 Epoch 5/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0831 - accuracy: 0.9725 - val_loss: 0.4073 - val_accuracy: 0.9100 Epoch 6/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0601 - accuracy: 0.9805 - val_loss: 0.4279 - val_accuracy: 0.9063 Epoch 7/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0600 - accuracy: 0.9799 - val_loss: 0.5535 - val_accuracy: 0.9029 Epoch 8/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0534 - accuracy: 0.9822 - val_loss: 0.5435 - val_accuracy: 0.8898 Epoch 9/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0454 - accuracy: 0.9847 - val_loss: 0.4991 - val_accuracy: 0.9048 Epoch 10/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0332 - accuracy: 0.9889 - val_loss: 0.5118 - val_accuracy: 0.9000 Epoch 11/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0366 - accuracy: 0.9875 - val_loss: 0.4596 - val_accuracy: 0.9138 Epoch 12/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0336 - accuracy: 0.9886 - val_loss: 0.5509 - val_accuracy: 0.9084 Epoch 13/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0264 - accuracy: 0.9914 - val_loss: 0.5085 - val_accuracy: 0.9170 Epoch 14/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0291 - accuracy: 0.9900 - val_loss: 0.5081 - val_accuracy: 0.9098 Epoch 15/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0300 - accuracy: 0.9904 - val_loss: 0.4645 - val_accuracy: 0.9222 Epoch 16/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0313 - accuracy: 0.9901 - val_loss: 0.5429 - val_accuracy: 0.9109 Epoch 17/50 180/180 [==============================] - 1s 7ms/step - loss: 0.0254 - accuracy: 0.9917 - val_loss: 0.5461 - val_accuracy: 0.9045 Epoch 18/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0256 - accuracy: 0.9915 - val_loss: 0.4104 - val_accuracy: 0.9297 Epoch 19/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0263 - accuracy: 0.9911 - val_loss: 0.4808 - val_accuracy: 0.9188 Epoch 20/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0244 - accuracy: 0.9922 - val_loss: 0.6218 - val_accuracy: 0.9061 Epoch 21/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0256 - accuracy: 0.9918 - val_loss: 0.3997 - val_accuracy: 0.9354 Epoch 22/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0237 - accuracy: 0.9920 - val_loss: 0.5523 - val_accuracy: 0.9168 Epoch 23/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0237 - accuracy: 0.9925 - val_loss: 0.6196 - val_accuracy: 0.9104 Epoch 24/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0187 - accuracy: 0.9936 - val_loss: 0.5682 - val_accuracy: 0.9231 Epoch 25/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0237 - accuracy: 0.9926 - val_loss: 0.7426 - val_accuracy: 0.8912 Epoch 26/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0209 - accuracy: 0.9934 - val_loss: 0.6379 - val_accuracy: 0.9136 Epoch 27/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0260 - accuracy: 0.9915 - val_loss: 0.5095 - val_accuracy: 0.9358 Epoch 28/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0172 - accuracy: 0.9942 - val_loss: 0.4996 - val_accuracy: 0.9231 Epoch 29/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0154 - accuracy: 0.9952 - val_loss: 0.7350 - val_accuracy: 0.8971 Epoch 30/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0196 - accuracy: 0.9936 - val_loss: 0.7454 - val_accuracy: 0.9057 Epoch 31/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0261 - accuracy: 0.9924 - val_loss: 0.5889 - val_accuracy: 0.9240 Epoch 32/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0158 - accuracy: 0.9950 - val_loss: 0.5887 - val_accuracy: 0.9161 Epoch 33/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0222 - accuracy: 0.9927 - val_loss: 0.4610 - val_accuracy: 0.9340 Epoch 34/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0129 - accuracy: 0.9959 - val_loss: 0.5752 - val_accuracy: 0.9240 Epoch 35/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0121 - accuracy: 0.9957 - val_loss: 0.5812 - val_accuracy: 0.9163 Epoch 36/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0149 - accuracy: 0.9951 - val_loss: 0.5443 - val_accuracy: 0.9329 Epoch 37/50 177/180 [============================>.] - ETA: 0s - loss: 0.0227 - accuracy: 0.9930Restoring model weights from the end of the best epoch: 27. 180/180 [==============================] - 1s 6ms/step - loss: 0.0226 - accuracy: 0.9930 - val_loss: 0.4313 - val_accuracy: 0.9356 Epoch 00037: early stopping training -> tsc_lenet_7x7_more_filters Epoch 1/50 180/180 [==============================] - 1s 5ms/step - loss: 1.1082 - accuracy: 0.6777 - val_loss: 0.5419 - val_accuracy: 0.8229 Epoch 2/50 180/180 [==============================] - 1s 6ms/step - loss: 0.2867 - accuracy: 0.9122 - val_loss: 0.3470 - val_accuracy: 0.9077 Epoch 3/50 180/180 [==============================] - 1s 6ms/step - loss: 0.1859 - accuracy: 0.9420 - val_loss: 0.2841 - val_accuracy: 0.9186 Epoch 4/50 180/180 [==============================] - 1s 6ms/step - loss: 0.1382 - accuracy: 0.9568 - val_loss: 0.3459 - val_accuracy: 0.9147 Epoch 5/50 180/180 [==============================] - 1s 5ms/step - loss: 0.1078 - accuracy: 0.9653 - val_loss: 0.4020 - val_accuracy: 0.9034 Epoch 6/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0947 - accuracy: 0.9689 - val_loss: 0.3709 - val_accuracy: 0.9127 Epoch 7/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0819 - accuracy: 0.9729 - val_loss: 0.3734 - val_accuracy: 0.9195 Epoch 8/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0692 - accuracy: 0.9772 - val_loss: 0.3238 - val_accuracy: 0.9277 Epoch 9/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0643 - accuracy: 0.9787 - val_loss: 0.3112 - val_accuracy: 0.9293 Epoch 10/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0569 - accuracy: 0.9816 - val_loss: 0.4128 - val_accuracy: 0.9181 Epoch 11/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0505 - accuracy: 0.9838 - val_loss: 0.3798 - val_accuracy: 0.9299 Epoch 12/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0451 - accuracy: 0.9851 - val_loss: 0.3253 - val_accuracy: 0.9297 Epoch 13/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0541 - accuracy: 0.9819 - val_loss: 0.3005 - val_accuracy: 0.9376 Epoch 14/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0457 - accuracy: 0.9848 - val_loss: 0.4086 - val_accuracy: 0.9274 Epoch 15/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0386 - accuracy: 0.9871 - val_loss: 0.3206 - val_accuracy: 0.9376 Epoch 16/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0364 - accuracy: 0.9881 - val_loss: 0.3029 - val_accuracy: 0.9404 Epoch 17/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0364 - accuracy: 0.9883 - val_loss: 0.3699 - val_accuracy: 0.9243 Epoch 18/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0461 - accuracy: 0.9854 - val_loss: 0.3067 - val_accuracy: 0.9331 Epoch 19/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0332 - accuracy: 0.9892 - val_loss: 0.4030 - val_accuracy: 0.9265 Epoch 20/50 180/180 [==============================] - 1s 7ms/step - loss: 0.0352 - accuracy: 0.9882 - val_loss: 0.4647 - val_accuracy: 0.9277 Epoch 21/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0331 - accuracy: 0.9897 - val_loss: 0.4431 - val_accuracy: 0.9236 Epoch 22/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0324 - accuracy: 0.9892 - val_loss: 0.2685 - val_accuracy: 0.9494 Epoch 23/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0261 - accuracy: 0.9914 - val_loss: 0.4718 - val_accuracy: 0.9247 Epoch 24/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0273 - accuracy: 0.9915 - val_loss: 0.4761 - val_accuracy: 0.9222 Epoch 25/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0426 - accuracy: 0.9865 - val_loss: 0.3959 - val_accuracy: 0.9336 Epoch 26/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0302 - accuracy: 0.9900 - val_loss: 0.2182 - val_accuracy: 0.9587 Epoch 27/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0247 - accuracy: 0.9924 - val_loss: 0.4350 - val_accuracy: 0.9320 Epoch 28/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0253 - accuracy: 0.9917 - val_loss: 0.4008 - val_accuracy: 0.9313 Epoch 29/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0300 - accuracy: 0.9904 - val_loss: 0.4023 - val_accuracy: 0.9388 Epoch 30/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0233 - accuracy: 0.9923 - val_loss: 0.3851 - val_accuracy: 0.9454 Epoch 31/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0251 - accuracy: 0.9917 - val_loss: 0.3099 - val_accuracy: 0.9481 Epoch 32/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0255 - accuracy: 0.9915 - val_loss: 0.3926 - val_accuracy: 0.9420 Epoch 33/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0231 - accuracy: 0.9929 - val_loss: 0.4102 - val_accuracy: 0.9474 Epoch 34/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0293 - accuracy: 0.9904 - val_loss: 0.5148 - val_accuracy: 0.9270 Epoch 35/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0253 - accuracy: 0.9915 - val_loss: 0.4500 - val_accuracy: 0.9320 Epoch 36/50 179/180 [============================>.] - ETA: 0s - loss: 0.0228 - accuracy: 0.9927Restoring model weights from the end of the best epoch: 26. 180/180 [==============================] - 1s 5ms/step - loss: 0.0228 - accuracy: 0.9927 - val_loss: 0.3268 - val_accuracy: 0.9469 Epoch 00036: early stopping training -> tsc_lenet_11x11_more_filters Epoch 1/50 180/180 [==============================] - 1s 5ms/step - loss: 1.3019 - accuracy: 0.6263 - val_loss: 0.6597 - val_accuracy: 0.8061 Epoch 2/50 180/180 [==============================] - 1s 5ms/step - loss: 0.4175 - accuracy: 0.8734 - val_loss: 0.4244 - val_accuracy: 0.8594 Epoch 3/50 180/180 [==============================] - 1s 5ms/step - loss: 0.2825 - accuracy: 0.9128 - val_loss: 0.3507 - val_accuracy: 0.8868 Epoch 4/50 180/180 [==============================] - 1s 5ms/step - loss: 0.2100 - accuracy: 0.9336 - val_loss: 0.2982 - val_accuracy: 0.9163 Epoch 5/50 180/180 [==============================] - 1s 4ms/step - loss: 0.1788 - accuracy: 0.9433 - val_loss: 0.3357 - val_accuracy: 0.9095 Epoch 6/50 180/180 [==============================] - 1s 6ms/step - loss: 0.1498 - accuracy: 0.9532 - val_loss: 0.2634 - val_accuracy: 0.9329 Epoch 7/50 180/180 [==============================] - 1s 4ms/step - loss: 0.1332 - accuracy: 0.9577 - val_loss: 0.2947 - val_accuracy: 0.9166 Epoch 8/50 180/180 [==============================] - 1s 4ms/step - loss: 0.1229 - accuracy: 0.9597 - val_loss: 0.3154 - val_accuracy: 0.9111 Epoch 9/50 180/180 [==============================] - 1s 4ms/step - loss: 0.1127 - accuracy: 0.9634 - val_loss: 0.3136 - val_accuracy: 0.9177 Epoch 10/50 180/180 [==============================] - 1s 6ms/step - loss: 0.1063 - accuracy: 0.9646 - val_loss: 0.3646 - val_accuracy: 0.9247 Epoch 11/50 180/180 [==============================] - 1s 5ms/step - loss: 0.1029 - accuracy: 0.9669 - val_loss: 0.2995 - val_accuracy: 0.9227 Epoch 12/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0911 - accuracy: 0.9697 - val_loss: 0.3848 - val_accuracy: 0.9261 Epoch 13/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0888 - accuracy: 0.9718 - val_loss: 0.3058 - val_accuracy: 0.9256 Epoch 14/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0864 - accuracy: 0.9722 - val_loss: 0.4472 - val_accuracy: 0.9125 Epoch 15/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0810 - accuracy: 0.9739 - val_loss: 0.3482 - val_accuracy: 0.9261 Epoch 16/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0720 - accuracy: 0.9759 - val_loss: 0.2620 - val_accuracy: 0.9390 Epoch 17/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0743 - accuracy: 0.9750 - val_loss: 0.3392 - val_accuracy: 0.9274 Epoch 18/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0662 - accuracy: 0.9785 - val_loss: 0.2697 - val_accuracy: 0.9388 Epoch 19/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0679 - accuracy: 0.9768 - val_loss: 0.2397 - val_accuracy: 0.9458 Epoch 20/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0626 - accuracy: 0.9798 - val_loss: 0.3763 - val_accuracy: 0.9322 Epoch 21/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0577 - accuracy: 0.9802 - val_loss: 0.3685 - val_accuracy: 0.9324 Epoch 22/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0571 - accuracy: 0.9816 - val_loss: 0.3515 - val_accuracy: 0.9261 Epoch 23/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0653 - accuracy: 0.9792 - val_loss: 0.2571 - val_accuracy: 0.9417 Epoch 24/50 180/180 [==============================] - 1s 6ms/step - loss: 0.0535 - accuracy: 0.9821 - val_loss: 0.2390 - val_accuracy: 0.9492 Epoch 25/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0558 - accuracy: 0.9812 - val_loss: 0.2851 - val_accuracy: 0.9444 Epoch 26/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0546 - accuracy: 0.9819 - val_loss: 0.3254 - val_accuracy: 0.9358 Epoch 27/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0550 - accuracy: 0.9819 - val_loss: 0.3077 - val_accuracy: 0.9397 Epoch 28/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0494 - accuracy: 0.9832 - val_loss: 0.2992 - val_accuracy: 0.9472 Epoch 29/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0523 - accuracy: 0.9837 - val_loss: 0.3086 - val_accuracy: 0.9488 Epoch 30/50 180/180 [==============================] - 1s 5ms/step - loss: 0.0482 - accuracy: 0.9840 - val_loss: 0.3577 - val_accuracy: 0.9270 Epoch 31/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0553 - accuracy: 0.9827 - val_loss: 0.3010 - val_accuracy: 0.9361 Epoch 32/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0424 - accuracy: 0.9856 - val_loss: 0.3712 - val_accuracy: 0.9354 Epoch 33/50 180/180 [==============================] - 1s 4ms/step - loss: 0.0541 - accuracy: 0.9824 - val_loss: 0.3092 - val_accuracy: 0.9485 Epoch 34/50 169/180 [===========================>..] - ETA: 0s - loss: 0.0463 - accuracy: 0.9847Restoring model weights from the end of the best epoch: 24. 180/180 [==============================] - 1s 4ms/step - loss: 0.0450 - accuracy: 0.9851 - val_loss: 0.3678 - val_accuracy: 0.9372 Epoch 00034: early stopping
history.keys()
dict_keys(['tsc_baseline', 'tsc_lenet_batch-norm_dropout', 'tsc_lenet_always_augment', 'tsc_lenet_more_filters', 'tsc_lenet_7x7_more_filters', 'tsc_lenet_11x11_more_filters'])
# Training summary/plots generation (collected in history dict)
from functools import reduce
summaries = {
nn: {
metric: {
"train": reduce(lambda x, y: x + y, [x.history[metric] for x in history]),
"valid": reduce(
lambda x, y: x + y, [x.history[f"val_{metric}"] for x in history]
),
}
for metric in ["loss", "accuracy"]
}
for nn, history in history.items()
}
for nn, summary in summaries.items():
plt.figure(figsize=(16, 6))
for metric, ax in zip(
summary.keys(), [plt.subplot(1, 2, idx) for idx in range(1, 3)]
):
for set_label, values in summary[metric].items():
ax.plot(values, label=f"{set_label} {metric}")
ax.set_xlabel("epochs")
ax.grid(visible=True)
ax.set_title(f"{nn} {metric}")
if metric == "accuracy":
ax.set_ylim((0.7, 1))
ax.hlines(
0.93,
0,
len(values),
color="red",
linestyle="dashed",
label="target acc.0.93",
)
else:
ax.set_ylim((0, 2))
ax.legend()
# Saving the figure to a file (png)
plt.savefig(f"./output/{nn}_training_session.png")
# Accuracy evolution per epoch. model
plt.figure(figsize=(16, 6))
max_epochs = 1
for model_name, results in summaries.items():
validation_accuracy = results["accuracy"]["valid"]
max_epochs = max(1+len(validation_accuracy), max_epochs)
plt.xticks(range(1, max_epochs))
plt.plot(range(1, 1+len(validation_accuracy)), validation_accuracy, label=f"{model_name}")
plt.ylim((0.7, 1))
plt.hlines(
0.93,
0,
max_epochs,
color="black",
linestyle="dashed",
label="Target accuracy 0.93",
)
plt.xlabel("Epochs")
plt.ylabel("Accuracy on Valid.")
plt.xticks(rotation=-45)
plt.grid()
plt.legend()
plt.savefig(f"./output/models_comparison.png")
# TODO: Add a visualization of a monotonous increase keeping only
# epochs where the accuracy on the validation increases
# compared to last best reached accuracy
# Accuracy on train, validation (and test) sets
from traffic_sign_classifier.utils import accuracy, accuracy_classes
for _, network in networks.items():
print(f"\n{network._name}:\n")
print(f" Accuracy on the Train set = {accuracy(network.pred_model(X_train), y_train)}")
print(f" Accuracy on the Valid. set = {accuracy(network.pred_model(X_valid), y_valid)}")
# Test set accuracy
print(f" Accuracy on the Test set = {accuracy(network.pred_model(X_test), y_test)}")
tsc_baseline: Accuracy on the Train set = 0.9970688812896922 Accuracy on the Valid. set = 0.9315192743764172 Accuracy on the Test set = 0.917735550277118 tsc_lenet_batch-norm_dropout: Accuracy on the Train set = 0.9986493864766229 Accuracy on the Valid. set = 0.9344671201814059 Accuracy on the Test set = 0.9209817893903405 tsc_lenet_always_augment: Accuracy on the Train set = 0.9949136469438777 Accuracy on the Valid. set = 0.9360544217687075 Accuracy on the Test set = 0.9153602533650039 tsc_lenet_more_filters: Accuracy on the Train set = 0.9984482312710136 Accuracy on the Valid. set = 0.9358276643990929 Accuracy on the Test set = 0.9271575613618369 tsc_lenet_7x7_more_filters: Accuracy on the Train set = 0.9975286646167993 Accuracy on the Valid. set = 0.9587301587301588 Accuracy on the Test set = 0.934916864608076 tsc_lenet_11x11_more_filters: Accuracy on the Train set = 0.9972987729532458 Accuracy on the Valid. set = 0.9492063492063492 Accuracy on the Test set = 0.9288202692003167
# md output generation
from IPython.display import Markdown, display
def render_acc_cell(acc, threshold=0.93):
""" Render acc cell with background"""
if acc >= threshold :
return f'<p style="background:green; opacity=0.1"> {acc:.3f} </p>'
return f'<p style="background:red; opacity=0.3"> {acc:.3f} </p>'
datasets_dict = { "orig. train": (X_train, y_train),
"valid": (X_valid, y_valid),
"test":(X_test, y_test)}
keys = ["orig. train", "valid", "test"]
header = "\n".join(["| Model | acc. " + "| acc. ".join(keys),
"|:-" + "-:|:-"*len(datasets_dict)+ "-:|"])
rows = []
for _, network in networks.items():
rows.append(f"| {network.name} |"+ "|" .join(f"{render_acc_cell(accuracy(network.pred_model(X), y))}" if dataset_label == "valid" else f"{accuracy(network.pred_model(X), y):.3f}" for dataset_label, (X,y) in
[(key, datasets_dict[key]) for key in keys] ))
table = "\n".join([header]+rows)
display(Markdown(table))
print(table)
| Model | acc. orig. train | acc. valid | acc. test |
|---|---|---|---|
| tsc_baseline | 0.997 | 0.932 |
0.918 |
| tsc_lenet_batch-norm_dropout | 0.999 | 0.934 |
0.921 |
| tsc_lenet_always_augment | 0.995 | 0.936 |
0.915 |
| tsc_lenet_more_filters | 0.998 | 0.936 |
0.927 |
| tsc_lenet_7x7_more_filters | 0.998 | 0.959 |
0.935 |
| tsc_lenet_11x11_more_filters | 0.997 | 0.949 |
0.929 |
| Model | acc. orig. train| acc. valid| acc. test |:--:|:--:|:--:|:--:| | tsc_baseline |0.997|<p style="background:green; opacity=0.1"> 0.932 </p>|0.918 | tsc_lenet_batch-norm_dropout |0.999|<p style="background:green; opacity=0.1"> 0.934 </p>|0.921 | tsc_lenet_always_augment |0.995|<p style="background:green; opacity=0.1"> 0.936 </p>|0.915 | tsc_lenet_more_filters |0.998|<p style="background:green; opacity=0.1"> 0.936 </p>|0.927 | tsc_lenet_7x7_more_filters |0.998|<p style="background:green; opacity=0.1"> 0.959 </p>|0.935 | tsc_lenet_11x11_more_filters |0.997|<p style="background:green; opacity=0.1"> 0.949 </p>|0.929
# Select the model based on the validation perf.
model_acc_sorted = sorted([ (model, accuracy(network.pred_model(X_valid), y_valid)) for model, network in networks.items()], key= lambda x:x[1], reverse=True)
model_acc_sorted
[('tsc_lenet_7x7_more_filters', 0.9587301587301588),
('tsc_lenet_11x11_more_filters', 0.9492063492063492),
('tsc_lenet_always_augment', 0.9360544217687075),
('tsc_lenet_more_filters', 0.9358276643990929),
('tsc_lenet_batch-norm_dropout', 0.9344671201814059),
('tsc_baseline', 0.9315192743764172)]
# TODO: rename to best model
model = networks[model_acc_sorted[0][0]]
print(f"Seletected model -> {model.name}")
# Save model (to integrate to LeNet class)
model.pred_model.save(os.path.join(output_dir, f"selected_{model.name}_{run_id}"))
Seletected model -> tsc_lenet_7x7_more_filters WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model. INFO:tensorflow:Assets written to: ./output/selected_tsc_lenet_7x7_more_filters_20220227-122050/assets
# Generate (selected) model arch/layers visualization
from keras.utils.vis_utils import plot_model
plot_model(model.model, to_file=os.path.join(output_dir, "selected_model_train.png"), show_shapes=True, show_layer_names=False)
plot_model(model.pred_model, to_file=os.path.join(output_dir, "selected_model_pred.png"), show_shapes=True, show_layer_names=False)
# model.predict(X_valid)[:2]
def misclassified(model, images, gt):
""" Returns the mis-classified item indices zipped
with the predicted and the correct ground truth label
:param model: model to use for prediction
:param images: images as a np.array with shape (N, 32, 32,3)
:param gt: label as integer of shape (N,)
"""
predictions = np.argmax(model.predict(images), axis=1) # perform prediction with the given modl
misclassified_indices = np.argwhere(predictions != gt).flatten()
return np.array(list(zip(misclassified_indices, predictions[misclassified_indices], gt[misclassified_indices])))
valid_misclassified = misclassified(model, X_valid, y_valid)
print(len(valid_misclassified))
182
from traffic_sign_classifier.utils import load_signnames
index2label = load_signnames("./traffic-signs-data/signnames.csv")
from collections import defaultdict
valid_misclassified_by_pred = defaultdict(lambda:[]) # misclassified grouped by predicted class
valid_misclassified_by_gt = defaultdict(lambda:[]) # misclassified grouped by gt clas
for index, prediction, gt in valid_misclassified:
valid_misclassified_by_pred[prediction].append((index, gt))
valid_misclassified_by_gt[gt].append((index, prediction))
# print(f"{index} pred.: {index2label.get(prediction)}, gt: {index2label.get(gt)}")
# Sorting the missclassification by category (first those with high error rate)
top_mis_nbr = 10 # number of the top misclassified category to summarize
sorted_misclassified = sorted([ (category, misclassifications)
for category, misclassifications in valid_misclassified_by_gt.items()],
key=lambda x: len(x[1]), reverse=True)
for category, misclassification in sorted_misclassified[:top_mis_nbr]:
print(f"\n(id: {category}) {index2label.get(category)}, {len(misclassification)} incorrectly classified:")
# Groupping the misclassification by predicted class
grp = defaultdict(lambda:[])
for item, predicted in misclassification:
grp[predicted].append(item)
for prediction, items in grp.items():
print(f"* as {class2label.get(prediction, prediction)} -> {len(items)} time(s)!")
(id: 16) Vehicles over 3.5 metric tons prohibited, 25 incorrectly classified: * as End of no passing -> 22 time(s)! * as No passing -> 3 time(s)! (id: 25) Road work, 20 incorrectly classified: * as General caution -> 2 time(s)! * as Right-of-way at the next intersection -> 5 time(s)! * as Beware of ice/snow -> 4 time(s)! * as Wild animals crossing -> 8 time(s)! * as Speed limit (50km/h) -> 1 time(s)! (id: 7) Speed limit (100km/h), 16 incorrectly classified: * as Speed limit (120km/h) -> 16 time(s)! (id: 41) End of no passing, 15 incorrectly classified: * as End of all speed and passing limits -> 14 time(s)! * as End of speed limit (80km/h) -> 1 time(s)! (id: 21) Double curve, 14 incorrectly classified: * as Road narrows on the right -> 4 time(s)! * as Bumpy road -> 2 time(s)! * as Right-of-way at the next intersection -> 4 time(s)! * as Slippery road -> 4 time(s)! (id: 20) Dangerous curve to the right, 14 incorrectly classified: * as Right-of-way at the next intersection -> 3 time(s)! * as Bicycles crossing -> 2 time(s)! * as Road work -> 2 time(s)! * as Priority road -> 2 time(s)! * as General caution -> 1 time(s)! * as Slippery road -> 2 time(s)! * as Children crossing -> 1 time(s)! * as Road narrows on the right -> 1 time(s)! (id: 40) Roundabout mandatory, 13 incorrectly classified: * as Speed limit (30km/h) -> 12 time(s)! * as End of no passing -> 1 time(s)! (id: 27) Pedestrians, 8 incorrectly classified: * as General caution -> 8 time(s)! (id: 30) Beware of ice/snow, 7 incorrectly classified: * as Dangerous curve to the right -> 2 time(s)! * as Right-of-way at the next intersection -> 1 time(s)! * as Road work -> 2 time(s)! * as Double curve -> 2 time(s)! (id: 1) Speed limit (30km/h), 6 incorrectly classified: * as Speed limit (50km/h) -> 3 time(s)! * as Speed limit (70km/h) -> 2 time(s)! * as Speed limit (80km/h) -> 1 time(s)!
Visualizing the most freq. misclassified categories/classes
# most frequently missed categories
most_misclassified_categs, _ = zip(*sorted_misclassified)
grid_visu(X_valid[valid_misclassified[:, 0]], labels=y_valid[valid_misclassified[:, 0]],
sample_size=8,
categories_per_fig=10,
categories= most_misclassified_categs[:top_mis_nbr], label_to_name=class2label,
output_dir_path="./output/misclassified")
To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.
You may find signnames.csv useful as it contains mappings from the class id (integer) to the actual sign name.
from typing import List
from glob import glob
import cv2
test_paths = [image_path for image_path in glob("./examples/test_sample/*.png")]
# For convinience and batch/faster prediction that we resize the new image to (32x32)
# 32x32 resizing is part of the preprocessing for prediction though!
def extract_test_categ(image_path) -> int:
""" Extract test category/class from image path.
convension for naming the files [class]_[id].extension
"""
return int(os.path.split(os.path.basename(image_path))[-1].split("_")[0])
def load_data(paths: List[str], label_extractor=extract_test_categ):
""" A helper loading the images from a list of paths
and extract the label from their path using the label extractor
"""
for path in sorted(paths):
yield path, cv2.resize(cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB), (32,32)) , label_extractor(path)
# sample_test_image = cv2.cvtColor(cv2.imread(test_paths[-1]), cv2.COLOR_BGR2RGB)
# sample_test_image.reshape( [1] , sample_test_image.shape()
# As the resizing and scaling
model.predict(np.expand_dims(cv2.cvtColor(cv2.imread(test_paths[-1]), cv2.COLOR_BGR2RGB), axis=0))
array([[1.10844807e-29, 6.52402974e-18, 2.46341705e-20, 1.83302590e-19,
8.39493867e-32, 5.17346349e-20, 1.85125806e-15, 7.71561570e-20,
3.73429188e-26, 5.78403347e-24, 4.28642557e-31, 4.75347876e-17,
4.97479668e-20, 2.31061427e-25, 6.14848508e-30, 7.76564078e-34,
3.00574588e-13, 8.91939295e-27, 2.62466224e-15, 1.06228897e-16,
3.59841348e-14, 2.70087193e-21, 8.64019690e-26, 1.06923798e-14,
1.79996275e-16, 4.56535187e-22, 6.57328573e-20, 5.04163075e-25,
5.83435146e-19, 3.67424612e-29, 7.25427732e-24, 4.68115408e-11,
1.21988218e-24, 6.04605219e-08, 2.07955364e-14, 2.53493171e-10,
1.00940794e-13, 3.89921159e-01, 7.35849368e-11, 1.30308727e-05,
6.10065758e-01, 5.47515939e-12, 1.87036614e-16]], dtype=float32)
model.predict(cv2.cvtColor(cv2.imread(test_paths[-1]), cv2.COLOR_BGR2RGB))
<tf.Tensor: shape=(1, 43), dtype=float32, numpy=
array([[1.10845649e-29, 6.52407937e-18, 2.46342674e-20, 1.83302590e-19,
8.39500214e-32, 5.17346349e-20, 1.85126526e-15, 7.71561570e-20,
3.73430606e-26, 5.78405556e-24, 4.28645802e-31, 4.75349662e-17,
4.97481607e-20, 2.31061427e-25, 6.14848508e-30, 7.76569955e-34,
3.00576892e-13, 8.91939295e-27, 2.62466224e-15, 1.06229300e-16,
3.59840636e-14, 2.70089273e-21, 8.64022956e-26, 1.06924222e-14,
1.79997651e-16, 4.56535187e-22, 6.57323533e-20, 5.04164998e-25,
5.83439644e-19, 3.67427440e-29, 7.25427732e-24, 4.68117246e-11,
1.21989145e-24, 6.04605219e-08, 2.07955364e-14, 2.53493171e-10,
1.00941370e-13, 3.89921159e-01, 7.35853531e-11, 1.30308845e-05,
6.10065758e-01, 5.47517023e-12, 1.87037316e-16]], dtype=float32)>
test_paths, test_images , test_labels =zip(* load_data(test_paths))
print(len(test_paths))
print(test_paths)
19
('./examples/test_sample/13_1.png', './examples/test_sample/13_2.png', './examples/test_sample/13_3.png', './examples/test_sample/17_1.png', './examples/test_sample/17_2.png', './examples/test_sample/18_1.png', './examples/test_sample/18_2.png', './examples/test_sample/2_1.png', './examples/test_sample/33_1.png', './examples/test_sample/33_2.png', './examples/test_sample/35_1.png', './examples/test_sample/35_2.png', './examples/test_sample/35_3.png', './examples/test_sample/35_4.png', './examples/test_sample/38_1.png', './examples/test_sample/38_2.png', './examples/test_sample/3_1.png', './examples/test_sample/3_2.png', './examples/test_sample/3_3.png')
# Generate md image-set
# Todo center the visualization
grid_visu(test_images, test_labels, sample_size=4, label_to_name=class2label, categories_per_fig=8, output_dir_path="./output/ext_new")
# To move to LeNet (class) as a method
from typing import Iterable
from traffic_sign_classifier.nn.utils import softmax
def predict_paths(paths: Iterable[str]=test_paths, top_k=5):
""" Predict a list of image paths """
for path, image, label in load_data(paths):
image = image.reshape([1] + list(image.shape))
prediction = (model.predict(image)).flatten()
pred_categ = np.argmax(prediction)
print(f" {path}: gt: {label} -> predicted category ({pred_categ}:{class2label[pred_categ]}) ")
if top_k is not None:
for softmax_output, category in sorted(zip(softmax(prediction), range(len(prediction))), reverse=True)[:top_k]:
print(f" {category} {softmax_output} {class2label[category]} -> {category}")
yield path, label, pred_categ
ext_paths, ext_gt, ext_pred = zip(*predict_paths(test_paths))
./examples/test_sample/13_1.png: gt: 13 -> predicted category (13:Yield)
13 0.06078681722283363 Yield -> 13
42 0.02236221916973591 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.02236221916973591 End of no passing -> 41
40 0.02236221916973591 Roundabout mandatory -> 40
39 0.02236221916973591 Keep left -> 39
./examples/test_sample/13_2.png: gt: 13 -> predicted category (13:Yield)
13 0.06078681722283363 Yield -> 13
42 0.02236221916973591 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.02236221916973591 End of no passing -> 41
40 0.02236221916973591 Roundabout mandatory -> 40
39 0.02236221916973591 Keep left -> 39
./examples/test_sample/13_3.png: gt: 13 -> predicted category (13:Yield)
13 0.060780640691518784 Yield -> 13
12 0.022364670410752296 Priority road -> 12
42 0.02236230857670307 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.02236230857670307 End of no passing -> 41
40 0.02236230857670307 Roundabout mandatory -> 40
./examples/test_sample/17_1.png: gt: 17 -> predicted category (17:No entry)
17 0.06078677996993065 No entry -> 17
10 0.022362230345606804 No passing for vehicles over 3.5 metric tons -> 10
9 0.022362224757671356 No passing -> 9
42 0.02236221916973591 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.02236221916973591 End of no passing -> 41
./examples/test_sample/17_2.png: gt: 17 -> predicted category (17:No entry)
17 0.060786813497543335 No entry -> 17
9 0.022362224757671356 No passing -> 9
42 0.02236221916973591 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.02236221916973591 End of no passing -> 41
40 0.02236221916973591 Roundabout mandatory -> 40
./examples/test_sample/18_1.png: gt: 18 -> predicted category (11:Right-of-way at the next intersection)
11 0.05235183238983154 Right-of-way at the next intersection -> 11
28 0.02549450471997261 Children crossing -> 28
0 0.022746052592992783 Speed limit (20km/h) -> 0
31 0.022578010335564613 Wild animals crossing -> 31
18 0.022546695545315742 General caution -> 18
./examples/test_sample/18_2.png: gt: 18 -> predicted category (18:General caution)
18 0.06078681722283363 General caution -> 18
42 0.02236221916973591 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.02236221916973591 End of no passing -> 41
40 0.02236221916973591 Roundabout mandatory -> 40
39 0.02236221916973591 Keep left -> 39
./examples/test_sample/2_1.png: gt: 2 -> predicted category (2:Speed limit (50km/h))
2 0.06077960133552551 Speed limit (50km/h) -> 2
38 0.022364210337400436 Keep right -> 38
1 0.022363174706697464 Speed limit (30km/h) -> 1
4 0.022362343966960907 Speed limit (70km/h) -> 4
40 0.02236233465373516 Roundabout mandatory -> 40
./examples/test_sample/33_1.png: gt: 33 -> predicted category (40:Roundabout mandatory)
40 0.04166610166430473 Roundabout mandatory -> 40
37 0.033214882016181946 Go straight or left -> 37
39 0.022564182057976723 Keep left -> 39
42 0.0225638709962368 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.0225638709962368 End of no passing -> 41
./examples/test_sample/33_2.png: gt: 33 -> predicted category (33:Turn right ahead)
33 0.06078676879405975 Turn right ahead -> 33
40 0.02236223593354225 Roundabout mandatory -> 40
42 0.02236221916973591 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.02236221916973591 End of no passing -> 41
39 0.02236221916973591 Keep left -> 39
./examples/test_sample/35_1.png: gt: 35 -> predicted category (37:Go straight or left)
37 0.06022392585873604 Go straight or left -> 37
40 0.022587168961763382 Roundabout mandatory -> 40
35 0.022371092811226845 Ahead only -> 35
11 0.02237045392394066 Right-of-way at the next intersection -> 11
42 0.022370444610714912 End of no passing by vehicles over 3.5 metric tons -> 42
./examples/test_sample/35_2.png: gt: 35 -> predicted category (40:Roundabout mandatory)
40 0.037453267723321915 Roundabout mandatory -> 40
37 0.03366865590214729 Go straight or left -> 37
39 0.024617116898298264 Keep left -> 39
20 0.022813424468040466 Dangerous curve to the right -> 20
35 0.022613173350691795 Ahead only -> 35
./examples/test_sample/35_3.png: gt: 35 -> predicted category (35:Ahead only)
35 0.06078677996993065 Ahead only -> 35
37 0.02236223593354225 Go straight or left -> 37
42 0.02236221916973591 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.02236221916973591 End of no passing -> 41
40 0.02236221916973591 Roundabout mandatory -> 40
./examples/test_sample/35_4.png: gt: 35 -> predicted category (35:Ahead only)
35 0.06078681722283363 Ahead only -> 35
42 0.02236221916973591 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.02236221916973591 End of no passing -> 41
40 0.02236221916973591 Roundabout mandatory -> 40
39 0.02236221916973591 Keep left -> 39
./examples/test_sample/38_1.png: gt: 38 -> predicted category (38:Keep right)
38 0.06078681722283363 Keep right -> 38
42 0.02236221916973591 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.02236221916973591 End of no passing -> 41
40 0.02236221916973591 Roundabout mandatory -> 40
39 0.02236221916973591 Keep left -> 39
./examples/test_sample/38_2.png: gt: 38 -> predicted category (38:Keep right)
38 0.06078681722283363 Keep right -> 38
42 0.02236221916973591 End of no passing by vehicles over 3.5 metric tons -> 42
41 0.02236221916973591 End of no passing -> 41
40 0.02236221916973591 Roundabout mandatory -> 40
39 0.02236221916973591 Keep left -> 39
./examples/test_sample/3_1.png: gt: 3 -> predicted category (3:Speed limit (60km/h))
3 0.0605943463742733 Speed limit (60km/h) -> 3
5 0.022416861727833748 Speed limit (80km/h) -> 5
40 0.02238371968269348 Roundabout mandatory -> 40
2 0.022367872297763824 Speed limit (50km/h) -> 2
33 0.02236553281545639 Turn right ahead -> 33
./examples/test_sample/3_2.png: gt: 3 -> predicted category (2:Speed limit (50km/h))
2 0.04643229395151138 Speed limit (50km/h) -> 2
3 0.028793878853321075 Speed limit (60km/h) -> 3
40 0.023117557168006897 Roundabout mandatory -> 40
5 0.022674908861517906 Speed limit (80km/h) -> 5
35 0.022543735802173615 Ahead only -> 35
./examples/test_sample/3_3.png: gt: 3 -> predicted category (3:Speed limit (60km/h))
3 0.05437428504228592 Speed limit (60km/h) -> 3
5 0.02490927092730999 Speed limit (80km/h) -> 5
2 0.02263895981013775 Speed limit (50km/h) -> 2
1 0.022500071674585342 Speed limit (30km/h) -> 1
7 0.02246822789311409 Speed limit (100km/h) -> 7
print(test_paths)
('./examples/test_sample/13_1.png', './examples/test_sample/13_2.png', './examples/test_sample/13_3.png', './examples/test_sample/17_1.png', './examples/test_sample/17_2.png', './examples/test_sample/18_1.png', './examples/test_sample/18_2.png', './examples/test_sample/2_1.png', './examples/test_sample/33_1.png', './examples/test_sample/33_2.png', './examples/test_sample/35_1.png', './examples/test_sample/35_2.png', './examples/test_sample/35_3.png', './examples/test_sample/35_4.png', './examples/test_sample/38_1.png', './examples/test_sample/38_2.png', './examples/test_sample/3_1.png', './examples/test_sample/3_2.png', './examples/test_sample/3_3.png')
# Predicition to md
# TODO: mv as utils helper function
from typing import Dict
def render_pred_cell(pred, actual, class2label:Dict):
""" Render pred cell with correct vs not background"""
if pred == actual :
return f'<p style="background:green; opacity=0.3"> {pred}, {class2label.get(pred, pred)} </p>'
return f'<p style="background:red; opacity=0.3"> {pred}, {class2label.get(pred, pred)} </p>'
header = "\n".join(["| Image | Ground truth | Prediction | ","|:-----:|:------------:|:----------:|"])
rows = []
for path_, gt_, pred_ in sorted(zip(ext_paths, ext_gt, ext_pred)):
rows.append(f"| {path_} | {gt_}, {class2label[gt_]}| {render_pred_cell(pred_, gt_, class2label)}|")
table = "\n".join([header]+rows)
display(Markdown(table))
print(table)
| Image | Ground truth | Prediction |
|---|---|---|
| ./examples/test_sample/13_1.png | 13, Yield | 13, Yield |
| ./examples/test_sample/13_2.png | 13, Yield | 13, Yield |
| ./examples/test_sample/13_3.png | 13, Yield | 13, Yield |
| ./examples/test_sample/17_1.png | 17, No entry | 17, No entry |
| ./examples/test_sample/17_2.png | 17, No entry | 17, No entry |
| ./examples/test_sample/18_1.png | 18, General caution | 11, Right-of-way at the next intersection |
| ./examples/test_sample/18_2.png | 18, General caution | 18, General caution |
| ./examples/test_sample/2_1.png | 2, Speed limit (50km/h) | 2, Speed limit (50km/h) |
| ./examples/test_sample/33_1.png | 33, Turn right ahead | 40, Roundabout mandatory |
| ./examples/test_sample/33_2.png | 33, Turn right ahead | 33, Turn right ahead |
| ./examples/test_sample/35_1.png | 35, Ahead only | 37, Go straight or left |
| ./examples/test_sample/35_2.png | 35, Ahead only | 40, Roundabout mandatory |
| ./examples/test_sample/35_3.png | 35, Ahead only | 35, Ahead only |
| ./examples/test_sample/35_4.png | 35, Ahead only | 35, Ahead only |
| ./examples/test_sample/38_1.png | 38, Keep right | 38, Keep right |
| ./examples/test_sample/38_2.png | 38, Keep right | 38, Keep right |
| ./examples/test_sample/3_1.png | 3, Speed limit (60km/h) | 3, Speed limit (60km/h) |
| ./examples/test_sample/3_2.png | 3, Speed limit (60km/h) | 2, Speed limit (50km/h) |
| ./examples/test_sample/3_3.png | 3, Speed limit (60km/h) | 3, Speed limit (60km/h) |
| Image | Ground truth | Prediction | |:-----:|:------------:|:----------:| | ./examples/test_sample/13_1.png | 13, Yield| <p style="background:green; opacity=0.3"> 13, Yield </p>| | ./examples/test_sample/13_2.png | 13, Yield| <p style="background:green; opacity=0.3"> 13, Yield </p>| | ./examples/test_sample/13_3.png | 13, Yield| <p style="background:green; opacity=0.3"> 13, Yield </p>| | ./examples/test_sample/17_1.png | 17, No entry| <p style="background:green; opacity=0.3"> 17, No entry </p>| | ./examples/test_sample/17_2.png | 17, No entry| <p style="background:green; opacity=0.3"> 17, No entry </p>| | ./examples/test_sample/18_1.png | 18, General caution| <p style="background:red; opacity=0.3"> 11, Right-of-way at the next intersection </p>| | ./examples/test_sample/18_2.png | 18, General caution| <p style="background:green; opacity=0.3"> 18, General caution </p>| | ./examples/test_sample/2_1.png | 2, Speed limit (50km/h)| <p style="background:green; opacity=0.3"> 2, Speed limit (50km/h) </p>| | ./examples/test_sample/33_1.png | 33, Turn right ahead| <p style="background:red; opacity=0.3"> 40, Roundabout mandatory </p>| | ./examples/test_sample/33_2.png | 33, Turn right ahead| <p style="background:green; opacity=0.3"> 33, Turn right ahead </p>| | ./examples/test_sample/35_1.png | 35, Ahead only| <p style="background:red; opacity=0.3"> 37, Go straight or left </p>| | ./examples/test_sample/35_2.png | 35, Ahead only| <p style="background:red; opacity=0.3"> 40, Roundabout mandatory </p>| | ./examples/test_sample/35_3.png | 35, Ahead only| <p style="background:green; opacity=0.3"> 35, Ahead only </p>| | ./examples/test_sample/35_4.png | 35, Ahead only| <p style="background:green; opacity=0.3"> 35, Ahead only </p>| | ./examples/test_sample/38_1.png | 38, Keep right| <p style="background:green; opacity=0.3"> 38, Keep right </p>| | ./examples/test_sample/38_2.png | 38, Keep right| <p style="background:green; opacity=0.3"> 38, Keep right </p>| | ./examples/test_sample/3_1.png | 3, Speed limit (60km/h)| <p style="background:green; opacity=0.3"> 3, Speed limit (60km/h) </p>| | ./examples/test_sample/3_2.png | 3, Speed limit (60km/h)| <p style="background:red; opacity=0.3"> 2, Speed limit (50km/h) </p>| | ./examples/test_sample/3_3.png | 3, Speed limit (60km/h)| <p style="background:green; opacity=0.3"> 3, Speed limit (60km/h) </p>|
from traffic_sign_classifier.utils import accuracy_classes
correct_ext = np.sum(np.array(ext_pred)==np.array(ext_gt))
acc_ext = accuracy_classes(np.array(ext_pred), np.array(ext_gt))
print(f"The model was able to correctly guess {correct_ext} of the {len(ext_pred)} traffic signs, which gives an accuracy of {acc_ext:.2f}")
The model was able to correctly guess 14 of the 19 traffic signs, which gives an accuracy of 0.74
print (f" accuracy on the new sample -> {100*accuracy_classes(np.array(ext_gt), np.array(ext_pred)):.2f}%")
accuracy on the new sample -> 73.68%
# predict the images (in batch)
prediction = model.predict(np.array(test_images))
top_k = tf.nn.top_k(softmax(prediction), k=5)
ext_probs = top_k.values
ext_classes = top_k.indices
zip( test_images, *zip(*(map(np.array, zip(ext_probs, ext_classes)))))
<zip at 0x7f00ffc26700>
index = 1
plt.figure(figsize=(16, len(test_images)*2))
# TODO sort by classes (file name)
for image_file, test_image, probs, categs in zip(test_paths, test_images, *zip(*map(np.array, zip(ext_probs, ext_classes)))):
ax = plt.subplot(len(test_images), 2, index)
sorted_categs, sorted_probs = zip(*sorted(zip(categs, probs), key=lambda x: x [1]))
categ_labels = [f"(class {item}) -> {class2label.get(item, str(item))}" for item in map(int, sorted_categs)]
ax.barh(range(len(categ_labels))
, sorted_probs, tick_label=categ_labels)
ax.set_title(f"image {image_file}")
ax.grid()
index += 1
ax_image = plt.subplot(len(test_images), 2, index)
ax_image.imshow(test_image)
index += 1
plt.tight_layout()
# TODO save the output
plt.savefig(f"./output/top_5_softmax_probs.png")
for layer in model.pred_model.layers:
print(layer.name)
input_5 resizing rescaling conv2d_8 batch_normalization_6 max_pooling2d_8 conv2d_9 batch_normalization_7 max_pooling2d_9 flatten_4 dense_12 dense_13 dense_14
from mpl_toolkits.axes_grid1 import ImageGrid
from traffic_sign_classifier.visualization import visualize_feature_map_output
def build_activation_model(model, layer_name):
""" A helper to build the `activation` model
from the original(whole) model and the layer_name to inspect
:param model: original/src nn model
:param layer_name: layer_name as the selector for the layer output/feature map to be visualized
"""
return tf.keras.Model(
model.pred_model.input, model.pred_model.get_layer(layer_name).output
)
conv_layer_names = [layer.name for layer in model.pred_model.layers if layer.name.startswith("conv")]
conv_layer_names
['conv2d_8', 'conv2d_9']
test_paths
('./examples/test_sample/13_1.png',
'./examples/test_sample/13_2.png',
'./examples/test_sample/13_3.png',
'./examples/test_sample/17_1.png',
'./examples/test_sample/17_2.png',
'./examples/test_sample/18_1.png',
'./examples/test_sample/18_2.png',
'./examples/test_sample/2_1.png',
'./examples/test_sample/33_1.png',
'./examples/test_sample/33_2.png',
'./examples/test_sample/35_1.png',
'./examples/test_sample/35_2.png',
'./examples/test_sample/35_3.png',
'./examples/test_sample/35_4.png',
'./examples/test_sample/38_1.png',
'./examples/test_sample/38_2.png',
'./examples/test_sample/3_1.png',
'./examples/test_sample/3_2.png',
'./examples/test_sample/3_3.png')
for index, layer_name in enumerate(conv_layer_names[:]):
visualize_feature_map_output(test_images, test_paths, build_activation_model(model, layer_name),
f"{index}_cov2d", output_dir_path="./output/activation" ,
path_labeler=extract_test_categ,
class2label=class2label)